}
/* System is really put into sleep state by this stub */
-acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
+acpi_status acpi_enter_sleep_state(u8 sleep_state)
{
acpi_status status;
* Called directly before VMRUN. Checks if the VCPU needs a new ASID,
* assigns it, and if required, issues required TLB flushes.
*/
-asmlinkage void svm_asid_handle_vmrun(void)
+void svm_asid_handle_vmrun(void)
{
struct vcpu *curr = current;
struct vmcb_struct *vmcb = curr->arch.hvm_svm.vmcb;
vmcb, general1_intercepts | GENERAL1_INTERCEPT_VINTR);
}
-asmlinkage void svm_intr_assist(void)
+void svm_intr_assist(void)
{
struct vcpu *v = current;
struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
}
/* VCPU switch */
-asmlinkage void nsvm_vcpu_switch(struct cpu_user_regs *regs)
+void nsvm_vcpu_switch(struct cpu_user_regs *regs)
{
struct vcpu *v = current;
struct nestedvcpu *nv;
.nhvm_intr_blocked = nsvm_intr_blocked,
};
-asmlinkage void svm_vmexit_handler(struct cpu_user_regs *regs)
+void svm_vmexit_handler(struct cpu_user_regs *regs)
{
uint64_t exit_reason;
struct vcpu *v = current;
vmcb_set_vintr(vmcb, intr);
}
-asmlinkage void svm_trace_vmentry(void)
+void svm_trace_vmentry(void)
{
struct vcpu *curr = current;
HVMTRACE_ND(VMENTRY,
return 0;
}
-asmlinkage void vmx_intr_assist(void)
+void vmx_intr_assist(void)
{
struct hvm_intack intack;
struct vcpu *v = current;
domain_crash(curr->domain);
}
-asmlinkage void vmx_enter_realmode(struct cpu_user_regs *regs)
+void vmx_enter_realmode(struct cpu_user_regs *regs)
{
struct vcpu *v = current;
}
}
-asmlinkage void vmx_vmexit_handler(struct cpu_user_regs *regs)
+void vmx_vmexit_handler(struct cpu_user_regs *regs)
{
unsigned int exit_reason, idtv_info, intr_info = 0, vector = 0;
unsigned long exit_qualification, inst_len = 0;
nvmx_idtv_handling();
}
-asmlinkage void vmx_vmenter_helper(void)
+void vmx_vmenter_helper(void)
{
struct vcpu *curr = current;
u32 new_asid, old_asid;
vmreturn(regs, VMSUCCEED);
}
-asmlinkage void nvmx_switch_guest(void)
+void nvmx_switch_guest(void)
{
struct vcpu *v = current;
struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
DEFINE_PER_CPU(unsigned int, irq_count);
-asmlinkage void do_IRQ(struct cpu_user_regs *regs)
+void do_IRQ(struct cpu_user_regs *regs)
{
struct irqaction *action;
uint32_t tsc_in;
#define TRC_64_FLAG 0
#endif
-asmlinkage void trace_hypercall(void)
+void trace_hypercall(void)
{
struct cpu_user_regs *regs = guest_cpu_user_regs();
* are disabled). In such situations we can't do much that is safe. We try to
* print out some tracing and then we just spin.
*/
-asmlinkage void fatal_trap(int trapnr, struct cpu_user_regs *regs)
+void fatal_trap(int trapnr, struct cpu_user_regs *regs)
{
static DEFINE_PER_CPU(char, depth);
* Called from asm to set up the MCE trapbounce info.
* Returns 0 if no callback is set up, else 1.
*/
-asmlinkage int set_guest_machinecheck_trapbounce(void)
+int set_guest_machinecheck_trapbounce(void)
{
struct vcpu *v = current;
struct trap_bounce *tb = &v->arch.pv_vcpu.trap_bounce;
* Called from asm to set up the NMI trapbounce info.
* Returns 0 if no callback is set up, else 1.
*/
-asmlinkage int set_guest_nmi_trapbounce(void)
+int set_guest_nmi_trapbounce(void)
{
struct vcpu *v = current;
struct trap_bounce *tb = &v->arch.pv_vcpu.trap_bounce;
}
#define DO_ERROR_NOCODE(trapnr, name) \
-asmlinkage void do_##name(struct cpu_user_regs *regs) \
+void do_##name(struct cpu_user_regs *regs) \
{ \
do_trap(trapnr, regs, 0); \
}
#define DO_ERROR(trapnr, name) \
-asmlinkage void do_##name(struct cpu_user_regs *regs) \
+void do_##name(struct cpu_user_regs *regs) \
{ \
do_trap(trapnr, regs, 1); \
}
return EXCRET_fault_fixed;
}
-asmlinkage void do_invalid_op(struct cpu_user_regs *regs)
+void do_invalid_op(struct cpu_user_regs *regs)
{
struct bug_frame bug;
struct bug_frame_str bug_str;
panic("FATAL TRAP: vector = %d (invalid opcode)\n", TRAP_invalid_op);
}
-asmlinkage void do_int3(struct cpu_user_regs *regs)
+void do_int3(struct cpu_user_regs *regs)
{
DEBUGGER_trap_entry(TRAP_int3, regs);
do_guest_trap(TRAP_int3, regs, 0);
}
-asmlinkage void do_machine_check(struct cpu_user_regs *regs)
+void do_machine_check(struct cpu_user_regs *regs)
{
machine_check_vector(regs, regs->error_code);
}
* Bit 3: Reserved bit violation
* Bit 4: Instruction fetch
*/
-asmlinkage void do_page_fault(struct cpu_user_regs *regs)
+void do_page_fault(struct cpu_user_regs *regs)
{
unsigned long addr, fixup;
unsigned int error_code;
* during early boot (an issue was seen once, but was most likely a hardware
* problem).
*/
-asmlinkage void __init do_early_page_fault(struct cpu_user_regs *regs)
+void __init do_early_page_fault(struct cpu_user_regs *regs)
{
static int stuck;
static unsigned long prev_eip, prev_cr2;
#endif
}
-asmlinkage void do_general_protection(struct cpu_user_regs *regs)
+void do_general_protection(struct cpu_user_regs *regs)
{
struct vcpu *v = current;
unsigned long fixup;
static nmi_callback_t nmi_callback = dummy_nmi_callback;
-asmlinkage void do_nmi(struct cpu_user_regs *regs)
+void do_nmi(struct cpu_user_regs *regs)
{
unsigned int cpu = smp_processor_id();
unsigned char reason;
nmi_callback = dummy_nmi_callback;
}
-asmlinkage void do_device_not_available(struct cpu_user_regs *regs)
+void do_device_not_available(struct cpu_user_regs *regs)
{
struct vcpu *curr = current;
wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl | 1);
}
-asmlinkage void do_debug(struct cpu_user_regs *regs)
+void do_debug(struct cpu_user_regs *regs)
{
struct vcpu *v = current;
return;
}
-asmlinkage void do_spurious_interrupt_bug(struct cpu_user_regs *regs)
+void do_spurious_interrupt_bug(struct cpu_user_regs *regs)
{
}
.notifier_call = cpu_doublefault_tss_callback
};
-asmlinkage void do_double_fault(void)
+void do_double_fault(void)
{
struct tss_struct *tss;
unsigned int cpu;
l1_table_offset(addr), l1e_get_intpte(l1e), pfn);
}
-asmlinkage void double_fault(void);
-asmlinkage void do_double_fault(struct cpu_user_regs *regs)
+void double_fault(void);
+void do_double_fault(struct cpu_user_regs *regs)
{
unsigned int cpu;
extern unsigned char boot_edid_info[128];
#endif
+#define asmlinkage
+
#if defined(__x86_64__)
#define CONFIG_X86_64 1
#define CONFIG_COMPAT 1
-#define asmlinkage
-
#define PML4_ENTRY_BITS 39
#ifndef __ASSEMBLY__
#define PML4_ENTRY_BYTES (1UL << PML4_ENTRY_BITS)
#define CONFIG_X86_32 1
#define CONFIG_DOMAIN_PAGE 1
-#define asmlinkage __attribute__((regparm(0)))
-
/*
* Memory layout (high to low): PAE-SIZE
* ------
void nvmx_update_secondary_exec_control(struct vcpu *v,
unsigned long value);
void nvmx_update_exception_bitmap(struct vcpu *v, unsigned long value);
-asmlinkage void nvmx_switch_guest(void);
+void nvmx_switch_guest(void);
void nvmx_idtv_handling(void);
u64 nvmx_get_tsc_offset(struct vcpu *v);
int nvmx_n2_vmexit_handler(struct cpu_user_regs *regs,
fastcall void smp_cmci_interrupt(struct cpu_user_regs *regs);
fastcall void smp_irq_move_cleanup_interrupt(struct cpu_user_regs *regs);
-asmlinkage void do_IRQ(struct cpu_user_regs *regs);
+void do_IRQ(struct cpu_user_regs *regs);
void disable_8259A_irq(struct irq_desc *);
void enable_8259A_irq(struct irq_desc *);
void show_execution_state(struct cpu_user_regs *regs);
#define dump_execution_state() run_in_exception_handler(show_execution_state)
void show_page_walk(unsigned long addr);
-asmlinkage void fatal_trap(int trapnr, struct cpu_user_regs *regs);
+void fatal_trap(int trapnr, struct cpu_user_regs *regs);
#ifdef CONFIG_COMPAT
void compat_show_guest_stack(struct vcpu *, struct cpu_user_regs *, int lines);
void mcheck_init(struct cpuinfo_x86 *c, bool_t bsp);
#define DECLARE_TRAP_HANDLER(_name) \
-asmlinkage void _name(void); \
-asmlinkage void do_ ## _name(struct cpu_user_regs *regs)
+void _name(void); \
+void do_ ## _name(struct cpu_user_regs *regs)
DECLARE_TRAP_HANDLER(divide_error);
DECLARE_TRAP_HANDLER(debug);
DECLARE_TRAP_HANDLER(nmi);
DECLARE_TRAP_HANDLER(spurious_interrupt_bug);
#undef DECLARE_TRAP_HANDLER
-asmlinkage void syscall_enter(void);
-asmlinkage void sysenter_entry(void);
-asmlinkage void sysenter_eflags_saved(void);
-asmlinkage void compat_hypercall(void);
-asmlinkage void int80_direct_trap(void);
+void syscall_enter(void);
+void sysenter_entry(void);
+void sysenter_eflags_saved(void);
+void compat_hypercall(void);
+void int80_direct_trap(void);
-extern asmlinkage int hypercall(void);
+extern int hypercall(void);
int cpuid_hypervisor_leaves( uint32_t idx, uint32_t sub_idx,
uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx);
#define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
#define BUILD_IRQ(nr) \
-asmlinkage void IRQ_NAME(nr); \
+void IRQ_NAME(nr); \
__asm__( \
"\n"__ALIGN_STR"\n" \
STR(IRQ) #nr "_interrupt:\n\t" \
#define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
#define BUILD_IRQ(nr) \
-asmlinkage void IRQ_NAME(nr); \
+void IRQ_NAME(nr); \
__asm__( \
"\n"__ALIGN_STR"\n" \
STR(IRQ) #nr "_interrupt:\n\t" \